In [1]:
import numpy as np
import pandas as pd
from packaging import version
import time

from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as MSE
from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt
import seaborn as sns

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import to_categorical
import tensorflow.keras.backend as k

%matplotlib inline
np.set_printoptions(precision=3, suppress=True)

print("This notebook requires TensorFlow 2.0 or above")
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >=2

print("Keras version: ", keras.__version__)
This notebook requires TensorFlow 2.0 or above
TensorFlow version:  2.18.0
Keras version:  3.8.0
In [2]:
# Loading the cifar10 Dataset
(train_images, train_labels), (test_images, test_labels) = keras.datasets.cifar10.load_data()
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170498071/170498071 ━━━━━━━━━━━━━━━━━━━━ 12s 0us/step
In [3]:
# Exploratory Data Analysis
print(f"train images shape`: {train_images.shape}")
print(f"train labels shape: {train_labels.shape}")
print(f"test images shape: {test_images.shape}")
print(f"test labels shape: {test_labels.shape}")
train images shape`: (50000, 32, 32, 3)
train labels shape: (50000, 1)
test images shape: (10000, 32, 32, 3)
test labels shape: (10000, 1)
In [4]:
# Explore the labels, labeled as a numerical digit that needs conversion
# to an item description
print(f"First 10 training labels: {train_labels[:10]}")
First 10 training labels: [[6]
 [9]
 [9]
 [4]
 [1]
 [1]
 [2]
 [7]
 [8]
 [3]]
In [5]:
# Data Analysis Functions

def show_random_examples(x, y, p):
    indices = np.random.choice(range(x.shape[0]), 10, replace=False)

    x = x[indices]
    y = y[indices]
    p = p[indices]

    plt.figure(figsize=(10, 5))
    for i in range(10):
        plt.subplot(2, 5, i + 1)
        plt.imshow(x[i])
        plt.xticks([])
        plt.yticks([])
        col = 'green' if np.argmax(y[i]) == np.argmax(p[i]) else 'red'
        plt.xlabel(class_names_preview[np.argmax(p[i])], color=col)
    plt.show()

def get_three_classes(x, y):
    def indices_of(class_id):
        indices, _ = np.where(y == float(class_id))
        return indices

    indices = np.concatenate([indices_of(0), indices_of(1), indices_of(2)], axis=0)

    x = x[indices]
    y = y[indices]

    count = x.shape[0]
    indices = np.random.choice(range(count), count, replace=False)

    x = x[indices]
    y = y[indices]

    y = tf.keras.utils.to_categorical(y)

    return x, y

def plot_history(history):
  losses = history.history['loss']
  accs = history.history['accuracy']
  val_losses = history.history['val_loss']
  val_accs = history.history['val_accuracy']
  epochs = len(losses)

  plt.figure(figsize=(16, 4))
  for i, metrics in enumerate(zip([losses, accs], [val_losses, val_accs], ['Loss', 'Accuracy'])):
    plt.subplot(1, 2, i + 1)
    plt.plot(range(epochs), metrics[0], label='Training {}'.format(metrics[2]))
    plt.plot(range(epochs), metrics[1], label='Validation {}'.format(metrics[2]))
    plt.legend()
  plt.show()

def display_training_curves(training, validation, title, subplot):
  ax = plt.subplot(subplot)
  ax.plot(training)
  ax.plot(validation)
  ax.set_title('model '+ title)
  ax.set_ylabel(title)
  ax.set_xlabel('epoch')
  ax.legend(['training', 'validation'])

def print_validation_report(y_test, predictions):
    print("Classification Report")
    print(classification_report(y_test, predictions))
    print('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
    print('Root Mean Square Error: {}'.format(np.sqrt(MSE(y_test, predictions))))


def plot_confusion_matrix(y_true, y_pred):
    mtx = confusion_matrix(y_true, y_pred)
    fig, ax = plt.subplots(figsize=(16,12))
    sns.heatmap(mtx, annot=True, fmt='d', linewidths=.75,  cbar=False, ax=ax,cmap='Blues',linecolor='white')
    #  square=True,
    plt.ylabel('true label')
    plt.xlabel('predicted label')
In [6]:
train_image_preview, train_label_preview = get_three_classes(train_images, train_labels)
test_image_preview, test_label_preview = get_three_classes(test_images, test_labels)

class_names_preview = ['airplane', 'car', 'bird']
show_random_examples(train_image_preview, train_label_preview, train_label_preview)
No description has been provided for this image
In [7]:
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog','frog', 'horse' ,'ship' ,'truck']
In [8]:
image_train_split, image_val_split, label_train_split, label_val_split = train_test_split(train_images, train_labels, test_size=.1, random_state=42, shuffle=True)
print(image_train_split.shape)
print(image_val_split.shape)
print(label_train_split.shape)
print(label_val_split.shape)
(45000, 32, 32, 3)
(5000, 32, 32, 3)
(45000, 1)
(5000, 1)
In [9]:
image_train_norm = image_train_split / 255.0
image_val_norm = image_val_split / 255.0
image_test_norm = test_images / 255.0
image_train_norm.shape
Out[9]:
(45000, 32, 32, 3)
In [10]:
def add_to_data(data, model, history, test_pred):
  if data.get('model') is None:
    # Build initial data for table
    data['model'] = ['DNN']
    data['accuracy'] = [f"{history.history['accuracy'][-1]:.3f}"]
    data['val_accuracy'] = [f"{history.history['val_accuracy'][-1]:.3f}"]
    data['test_accuracy'] = [f"{test_pred[1]:.3f}"]
    data['loss'] = [f"{history.history['loss'][-1]:.3f}"]
    data['val_loss'] = [f"{history.history['val_loss'][-1]:.3f}"]
    data['test_loss'] = [f"{test_pred[0]:.3f}"]
    data['time'] = [f"{time_end - time_start:.3f}"]
  else:
    # Add to data table
    data['model'].append(model)
    data['accuracy'].append(f"{history.history['accuracy'][-1]:.3f}")
    data['val_accuracy'].append(f"{history.history['val_accuracy'][-1]:.3f}")
    data['test_accuracy'].append(f"{test_pred[1]:.3f}")
    data['loss'].append(f"{history.history['loss'][-1]:.3f}")
    data['val_loss'].append(f"{history.history['val_loss'][-1]:.3f}")
    data['test_loss'].append(f"{test_pred[0]:.3f}")
    data['time'].append(f"{time_end - time_start:.3f}")
In [11]:
patience_2_data = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-2'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=2)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_2_data, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 20s 16ms/step - accuracy: 0.3317 - loss: 1.8061 - val_accuracy: 0.5586 - val_loss: 1.4597
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5475 - loss: 1.2703 - val_accuracy: 0.5692 - val_loss: 1.3138
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6060 - loss: 1.1128 - val_accuracy: 0.6700 - val_loss: 1.1363
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6402 - loss: 1.0197 - val_accuracy: 0.6800 - val_loss: 1.0449
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6714 - loss: 0.9414 - val_accuracy: 0.7176 - val_loss: 0.9946
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6867 - loss: 0.9054 - val_accuracy: 0.7120 - val_loss: 0.9682
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6997 - loss: 0.8572 - val_accuracy: 0.6982 - val_loss: 0.9308
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.6995 - loss: 0.9494
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.72      0.78      0.75      1000
           1       0.92      0.78      0.85      1000
           2       0.61      0.58      0.60      1000
           3       0.54      0.48      0.51      1000
           4       0.59      0.74      0.66      1000
           5       0.83      0.41      0.55      1000
           6       0.62      0.90      0.74      1000
           7       0.84      0.68      0.75      1000
           8       0.68      0.90      0.77      1000
           9       0.87      0.74      0.80      1000

    accuracy                           0.70     10000
   macro avg       0.72      0.70      0.70     10000
weighted avg       0.72      0.70      0.70     10000

Accuracy Score: 0.7011
Root Mean Square Error: 2.235978532991764
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.3203 - loss: 1.8241 - val_accuracy: 0.5538 - val_loss: 1.5114
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5564 - loss: 1.2436 - val_accuracy: 0.6152 - val_loss: 1.2520
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6145 - loss: 1.0946 - val_accuracy: 0.6580 - val_loss: 1.1399
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6494 - loss: 1.0002 - val_accuracy: 0.6520 - val_loss: 1.0835
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6707 - loss: 0.9413 - val_accuracy: 0.7048 - val_loss: 1.0304
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6865 - loss: 0.9033 - val_accuracy: 0.7040 - val_loss: 0.9412
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6992 - loss: 0.8643 - val_accuracy: 0.7020 - val_loss: 0.9086
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7045 - loss: 0.9282
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.83      0.68      0.75      1000
           1       0.83      0.88      0.86      1000
           2       0.62      0.57      0.60      1000
           3       0.51      0.52      0.51      1000
           4       0.64      0.68      0.66      1000
           5       0.66      0.56      0.61      1000
           6       0.55      0.93      0.69      1000
           7       0.90      0.62      0.73      1000
           8       0.78      0.87      0.82      1000
           9       0.90      0.71      0.80      1000

    accuracy                           0.70     10000
   macro avg       0.72      0.70      0.70     10000
weighted avg       0.72      0.70      0.70     10000

Accuracy Score: 0.7017
Root Mean Square Error: 2.16545607205503
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 13ms/step - accuracy: 0.3370 - loss: 1.7901 - val_accuracy: 0.5174 - val_loss: 1.5246
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.5489 - loss: 1.2736 - val_accuracy: 0.6124 - val_loss: 1.2603
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6075 - loss: 1.1145 - val_accuracy: 0.6488 - val_loss: 1.1428
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6416 - loss: 1.0200 - val_accuracy: 0.6766 - val_loss: 1.0856
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6664 - loss: 0.9518 - val_accuracy: 0.6940 - val_loss: 0.9852
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6915 - loss: 0.8906 - val_accuracy: 0.7160 - val_loss: 0.9505
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7038 - loss: 0.8502 - val_accuracy: 0.7044 - val_loss: 0.9510
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7085 - loss: 0.8388 - val_accuracy: 0.7128 - val_loss: 0.9102
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7108 - loss: 0.9241
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.89      0.62      0.73      1000
           1       0.89      0.79      0.84      1000
           2       0.60      0.57      0.59      1000
           3       0.53      0.57      0.55      1000
           4       0.57      0.79      0.66      1000
           5       0.71      0.58      0.64      1000
           6       0.63      0.90      0.74      1000
           7       0.86      0.65      0.74      1000
           8       0.73      0.90      0.81      1000
           9       0.91      0.70      0.79      1000

    accuracy                           0.71     10000
   macro avg       0.73      0.71      0.71     10000
weighted avg       0.73      0.71      0.71     10000

Accuracy Score: 0.707
Root Mean Square Error: 2.1589117629027825
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3101 - loss: 1.8537 - val_accuracy: 0.5188 - val_loss: 1.4791
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.5323 - loss: 1.3077 - val_accuracy: 0.6056 - val_loss: 1.3059
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6063 - loss: 1.1294 - val_accuracy: 0.6784 - val_loss: 1.1021
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6397 - loss: 1.0344 - val_accuracy: 0.6648 - val_loss: 1.0728
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6615 - loss: 0.9678 - val_accuracy: 0.6150 - val_loss: 1.1301
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6240 - loss: 1.1471
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.86      0.52      0.65      1000
           1       0.90      0.75      0.82      1000
           2       0.51      0.51      0.51      1000
           3       0.42      0.53      0.47      1000
           4       0.44      0.77      0.56      1000
           5       0.71      0.36      0.48      1000
           6       0.50      0.91      0.64      1000
           7       0.88      0.50      0.64      1000
           8       0.82      0.75      0.78      1000
           9       0.88      0.66      0.75      1000

    accuracy                           0.63     10000
   macro avg       0.69      0.63      0.63     10000
weighted avg       0.69      0.63      0.63     10000

Accuracy Score: 0.6252
Root Mean Square Error: 2.269603489599009
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3331 - loss: 1.8102 - val_accuracy: 0.5382 - val_loss: 1.4662
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5405 - loss: 1.2844 - val_accuracy: 0.5790 - val_loss: 1.3714
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5957 - loss: 1.1427 - val_accuracy: 0.6064 - val_loss: 1.2018
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6309 - loss: 1.0397 - val_accuracy: 0.6730 - val_loss: 1.0886
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6655 - loss: 0.9654 - val_accuracy: 0.6914 - val_loss: 1.0041
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6834 - loss: 0.9140 - val_accuracy: 0.7028 - val_loss: 0.9811
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6951 - loss: 0.8735 - val_accuracy: 0.7218 - val_loss: 0.9274
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7069 - loss: 0.8401 - val_accuracy: 0.7272 - val_loss: 0.9089
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7117 - loss: 0.8278 - val_accuracy: 0.7358 - val_loss: 0.8768
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7275 - loss: 0.7901 - val_accuracy: 0.7312 - val_loss: 0.8618
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7300 - loss: 0.7689 - val_accuracy: 0.7468 - val_loss: 0.8292
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7354 - loss: 0.7543 - val_accuracy: 0.7220 - val_loss: 0.8483
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7366 - loss: 0.7466 - val_accuracy: 0.7242 - val_loss: 0.8259
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7235 - loss: 0.8393
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.86      0.70      0.77      1000
           1       0.94      0.76      0.84      1000
           2       0.67      0.58      0.62      1000
           3       0.56      0.56      0.56      1000
           4       0.57      0.82      0.67      1000
           5       0.72      0.58      0.64      1000
           6       0.59      0.91      0.72      1000
           7       0.90      0.65      0.75      1000
           8       0.81      0.86      0.84      1000
           9       0.85      0.79      0.82      1000

    accuracy                           0.72     10000
   macro avg       0.75      0.72      0.72     10000
weighted avg       0.75      0.72      0.72     10000

Accuracy Score: 0.7218
Root Mean Square Error: 2.056963781888247
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 13ms/step - accuracy: 0.3264 - loss: 1.8111 - val_accuracy: 0.5614 - val_loss: 1.4348
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5406 - loss: 1.2911 - val_accuracy: 0.6260 - val_loss: 1.2408
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6106 - loss: 1.1091 - val_accuracy: 0.6598 - val_loss: 1.1186
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6396 - loss: 1.0352 - val_accuracy: 0.6574 - val_loss: 1.0729
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6669 - loss: 0.9550 - val_accuracy: 0.6902 - val_loss: 1.0075
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6826 - loss: 0.9072 - val_accuracy: 0.7040 - val_loss: 0.9527
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6950 - loss: 0.8666 - val_accuracy: 0.7110 - val_loss: 0.9289
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7072 - loss: 0.8387 - val_accuracy: 0.6698 - val_loss: 0.9682
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7151 - loss: 0.8159 - val_accuracy: 0.7304 - val_loss: 0.8651
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7261 - loss: 0.7885 - val_accuracy: 0.6972 - val_loss: 0.8964
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7274 - loss: 0.7743 - val_accuracy: 0.7288 - val_loss: 0.8427
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7300 - loss: 0.8478
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.76      0.80      0.78      1000
           1       0.88      0.85      0.87      1000
           2       0.74      0.54      0.62      1000
           3       0.49      0.65      0.56      1000
           4       0.73      0.64      0.68      1000
           5       0.67      0.66      0.66      1000
           6       0.62      0.91      0.74      1000
           7       0.92      0.61      0.73      1000
           8       0.77      0.88      0.82      1000
           9       0.92      0.73      0.82      1000

    accuracy                           0.73     10000
   macro avg       0.75      0.73      0.73     10000
weighted avg       0.75      0.73      0.73     10000

Accuracy Score: 0.7264
Root Mean Square Error: 2.0694685308068834
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 15ms/step - accuracy: 0.3391 - loss: 1.8048 - val_accuracy: 0.5430 - val_loss: 1.4296
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.5346 - loss: 1.2992 - val_accuracy: 0.5956 - val_loss: 1.2888
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5945 - loss: 1.1428 - val_accuracy: 0.6092 - val_loss: 1.2322
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6372 - loss: 1.0379 - val_accuracy: 0.6696 - val_loss: 1.0721
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6574 - loss: 0.9848 - val_accuracy: 0.6870 - val_loss: 1.0177
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6798 - loss: 0.9237 - val_accuracy: 0.7096 - val_loss: 0.9592
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6878 - loss: 0.8945 - val_accuracy: 0.7114 - val_loss: 0.9243
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7019 - loss: 0.8566 - val_accuracy: 0.7254 - val_loss: 0.8873
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7098 - loss: 0.8274 - val_accuracy: 0.7050 - val_loss: 0.8916
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7194 - loss: 0.8087 - val_accuracy: 0.7272 - val_loss: 0.8480
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7267 - loss: 0.7793 - val_accuracy: 0.7262 - val_loss: 0.8827
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7317 - loss: 0.7657 - val_accuracy: 0.7256 - val_loss: 0.8289
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7221 - loss: 0.8522
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.80      0.78      0.79      1000
           1       0.94      0.80      0.87      1000
           2       0.68      0.57      0.62      1000
           3       0.62      0.46      0.53      1000
           4       0.51      0.84      0.64      1000
           5       0.83      0.43      0.57      1000
           6       0.60      0.92      0.73      1000
           7       0.84      0.70      0.77      1000
           8       0.81      0.88      0.85      1000
           9       0.84      0.82      0.83      1000

    accuracy                           0.72     10000
   macro avg       0.75      0.72      0.72     10000
weighted avg       0.75      0.72      0.72     10000

Accuracy Score: 0.7207
Root Mean Square Error: 2.041421073664128
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 13ms/step - accuracy: 0.3326 - loss: 1.8119 - val_accuracy: 0.5334 - val_loss: 1.4843
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5444 - loss: 1.2912 - val_accuracy: 0.6228 - val_loss: 1.2590
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6119 - loss: 1.1124 - val_accuracy: 0.6460 - val_loss: 1.1577
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6380 - loss: 1.0307 - val_accuracy: 0.6704 - val_loss: 1.0816
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6603 - loss: 0.9759 - val_accuracy: 0.6972 - val_loss: 1.0393
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6801 - loss: 0.9234 - val_accuracy: 0.6760 - val_loss: 1.0217
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6899 - loss: 0.8835 - val_accuracy: 0.6896 - val_loss: 0.9874
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.6894 - loss: 1.0046
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.79      0.72      0.76      1000
           1       0.92      0.74      0.82      1000
           2       0.63      0.52      0.57      1000
           3       0.52      0.50      0.51      1000
           4       0.52      0.78      0.63      1000
           5       0.61      0.64      0.62      1000
           6       0.64      0.86      0.73      1000
           7       0.86      0.58      0.69      1000
           8       0.74      0.87      0.80      1000
           9       0.88      0.67      0.76      1000

    accuracy                           0.69     10000
   macro avg       0.71      0.69      0.69     10000
weighted avg       0.71      0.69      0.69     10000

Accuracy Score: 0.6882
Root Mean Square Error: 2.204563448848774
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.3201 - loss: 1.8348 - val_accuracy: 0.5496 - val_loss: 1.4763
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5436 - loss: 1.2884 - val_accuracy: 0.5942 - val_loss: 1.3012
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5944 - loss: 1.1470 - val_accuracy: 0.6504 - val_loss: 1.1920
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6334 - loss: 1.0456 - val_accuracy: 0.7000 - val_loss: 1.0362
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6574 - loss: 0.9833 - val_accuracy: 0.6866 - val_loss: 1.0053
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6781 - loss: 0.9216 - val_accuracy: 0.6932 - val_loss: 0.9547
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.6964 - loss: 0.9720
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.77      0.78      0.78      1000
           1       0.78      0.88      0.83      1000
           2       0.59      0.55      0.57      1000
           3       0.55      0.43      0.48      1000
           4       0.65      0.63      0.64      1000
           5       0.71      0.52      0.60      1000
           6       0.52      0.94      0.67      1000
           7       0.84      0.68      0.75      1000
           8       0.80      0.84      0.82      1000
           9       0.88      0.69      0.78      1000

    accuracy                           0.69     10000
   macro avg       0.71      0.69      0.69     10000
weighted avg       0.71      0.69      0.69     10000

Accuracy Score: 0.6939
Root Mean Square Error: 2.25302019520465
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 15ms/step - accuracy: 0.3300 - loss: 1.8107 - val_accuracy: 0.5450 - val_loss: 1.4558
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5424 - loss: 1.2795 - val_accuracy: 0.5940 - val_loss: 1.2602
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6020 - loss: 1.1244 - val_accuracy: 0.6574 - val_loss: 1.1244
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6440 - loss: 1.0171 - val_accuracy: 0.6422 - val_loss: 1.0840
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6641 - loss: 0.9612 - val_accuracy: 0.6906 - val_loss: 1.0351
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6869 - loss: 0.8998 - val_accuracy: 0.6978 - val_loss: 0.9534
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6907 - loss: 0.8810 - val_accuracy: 0.7096 - val_loss: 0.9500
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7017 - loss: 0.8521 - val_accuracy: 0.7300 - val_loss: 0.8789
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7129 - loss: 0.8206 - val_accuracy: 0.7234 - val_loss: 0.8940
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7220 - loss: 0.7956 - val_accuracy: 0.7386 - val_loss: 0.8319
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7286 - loss: 0.7709 - val_accuracy: 0.7094 - val_loss: 0.8862
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7277 - loss: 0.7659 - val_accuracy: 0.7128 - val_loss: 0.8681
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7191 - loss: 0.8841
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.85      0.70      0.77      1000
           1       0.87      0.87      0.87      1000
           2       0.57      0.63      0.60      1000
           3       0.59      0.50      0.54      1000
           4       0.62      0.73      0.67      1000
           5       0.72      0.56      0.63      1000
           6       0.56      0.92      0.70      1000
           7       0.94      0.58      0.71      1000
           8       0.80      0.87      0.83      1000
           9       0.88      0.80      0.84      1000

    accuracy                           0.72     10000
   macro avg       0.74      0.72      0.72     10000
weighted avg       0.74      0.72      0.72     10000

Accuracy Score: 0.7169
Root Mean Square Error: 2.087893675453805
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [12]:
patience_data_3 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-3'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_3, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3158 - loss: 1.8512 - val_accuracy: 0.5100 - val_loss: 1.5046
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.5420 - loss: 1.2935 - val_accuracy: 0.5822 - val_loss: 1.2977
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6064 - loss: 1.1250 - val_accuracy: 0.6432 - val_loss: 1.1796
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6388 - loss: 1.0367 - val_accuracy: 0.6674 - val_loss: 1.0615
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6589 - loss: 0.9775 - val_accuracy: 0.6710 - val_loss: 1.0354
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6730 - loss: 0.9328 - val_accuracy: 0.7062 - val_loss: 0.9595
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6881 - loss: 0.8983 - val_accuracy: 0.7082 - val_loss: 0.9222
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7044 - loss: 0.8540 - val_accuracy: 0.6898 - val_loss: 0.9611
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7070 - loss: 0.8327 - val_accuracy: 0.7210 - val_loss: 0.8719
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7210 - loss: 0.7991 - val_accuracy: 0.7302 - val_loss: 0.8670
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.7272 - loss: 0.7852 - val_accuracy: 0.6980 - val_loss: 0.9131
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7303 - loss: 0.7722 - val_accuracy: 0.7498 - val_loss: 0.8001
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7318 - loss: 0.7642 - val_accuracy: 0.7372 - val_loss: 0.8209
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7375 - loss: 0.7467 - val_accuracy: 0.7178 - val_loss: 0.8360
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7468 - loss: 0.7323 - val_accuracy: 0.7650 - val_loss: 0.7665
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7498 - loss: 0.7232 - val_accuracy: 0.7208 - val_loss: 0.8215
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7503 - loss: 0.7101 - val_accuracy: 0.7410 - val_loss: 0.7752
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7573 - loss: 0.6958 - val_accuracy: 0.7304 - val_loss: 0.7971
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7256 - loss: 0.8151
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.79      0.80      0.79      1000
           1       0.93      0.83      0.88      1000
           2       0.73      0.54      0.62      1000
           3       0.59      0.53      0.56      1000
           4       0.54      0.84      0.66      1000
           5       0.75      0.57      0.65      1000
           6       0.59      0.92      0.72      1000
           7       0.93      0.58      0.72      1000
           8       0.79      0.89      0.84      1000
           9       0.91      0.78      0.84      1000

    accuracy                           0.73     10000
   macro avg       0.76      0.73      0.73     10000
weighted avg       0.76      0.73      0.73     10000

Accuracy Score: 0.7271
Root Mean Square Error: 2.015986111063268
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 15ms/step - accuracy: 0.3300 - loss: 1.8189 - val_accuracy: 0.5120 - val_loss: 1.5321
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.5298 - loss: 1.3186 - val_accuracy: 0.6072 - val_loss: 1.2917
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5929 - loss: 1.1546 - val_accuracy: 0.6224 - val_loss: 1.1517
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6399 - loss: 1.0339 - val_accuracy: 0.6640 - val_loss: 1.0965
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6589 - loss: 0.9842 - val_accuracy: 0.7082 - val_loss: 0.9919
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6750 - loss: 0.9271 - val_accuracy: 0.7072 - val_loss: 0.9863
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6901 - loss: 0.8878 - val_accuracy: 0.6948 - val_loss: 1.0074
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6980 - loss: 0.8626 - val_accuracy: 0.7214 - val_loss: 0.9021
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7093 - loss: 0.8300 - val_accuracy: 0.7048 - val_loss: 0.9029
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7149 - loss: 0.8146 - val_accuracy: 0.7276 - val_loss: 0.9016
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7248 - loss: 0.7898 - val_accuracy: 0.7190 - val_loss: 0.8711
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7257 - loss: 0.7746 - val_accuracy: 0.7234 - val_loss: 0.8428
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7353 - loss: 0.7558 - val_accuracy: 0.7312 - val_loss: 0.8350
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7312 - loss: 0.7581 - val_accuracy: 0.7264 - val_loss: 0.8333
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7414 - loss: 0.7385 - val_accuracy: 0.7180 - val_loss: 0.8365
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7466 - loss: 0.7189 - val_accuracy: 0.7664 - val_loss: 0.7568
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7474 - loss: 0.7201 - val_accuracy: 0.7554 - val_loss: 0.7545
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7505 - loss: 0.7055 - val_accuracy: 0.7338 - val_loss: 0.7965
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7525 - loss: 0.6988 - val_accuracy: 0.7642 - val_loss: 0.7191
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7697 - loss: 0.7375
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.85      0.76      0.80      1000
           1       0.96      0.82      0.89      1000
           2       0.75      0.58      0.65      1000
           3       0.64      0.55      0.59      1000
           4       0.65      0.81      0.72      1000
           5       0.65      0.73      0.69      1000
           6       0.75      0.88      0.81      1000
           7       0.86      0.75      0.80      1000
           8       0.79      0.91      0.85      1000
           9       0.84      0.86      0.85      1000

    accuracy                           0.77     10000
   macro avg       0.77      0.77      0.77     10000
weighted avg       0.77      0.77      0.77     10000

Accuracy Score: 0.7664
Root Mean Square Error: 1.9364658530425989
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3266 - loss: 1.8119 - val_accuracy: 0.5526 - val_loss: 1.4467
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5444 - loss: 1.2831 - val_accuracy: 0.5712 - val_loss: 1.3277
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6005 - loss: 1.1369 - val_accuracy: 0.6412 - val_loss: 1.1584
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6367 - loss: 1.0425 - val_accuracy: 0.6694 - val_loss: 1.0721
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6649 - loss: 0.9581 - val_accuracy: 0.6902 - val_loss: 0.9966
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6825 - loss: 0.9198 - val_accuracy: 0.7020 - val_loss: 0.9863
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6948 - loss: 0.8775 - val_accuracy: 0.7052 - val_loss: 0.9349
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7052 - loss: 0.8432 - val_accuracy: 0.6912 - val_loss: 0.9241
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7133 - loss: 0.8235 - val_accuracy: 0.7220 - val_loss: 0.8921
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7241 - loss: 0.7895 - val_accuracy: 0.6996 - val_loss: 0.8782
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7259 - loss: 0.7790 - val_accuracy: 0.7218 - val_loss: 0.8522
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7387 - loss: 0.7491 - val_accuracy: 0.7480 - val_loss: 0.8124
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7432 - loss: 0.7371 - val_accuracy: 0.7258 - val_loss: 0.8310
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7433 - loss: 0.7233 - val_accuracy: 0.7242 - val_loss: 0.8390
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7529 - loss: 0.7109 - val_accuracy: 0.7470 - val_loss: 0.7806
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7440 - loss: 0.7954
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.80      0.74      0.77      1000
           1       0.92      0.84      0.88      1000
           2       0.69      0.61      0.65      1000
           3       0.56      0.60      0.58      1000
           4       0.66      0.75      0.71      1000
           5       0.72      0.60      0.65      1000
           6       0.65      0.91      0.76      1000
           7       0.91      0.67      0.77      1000
           8       0.76      0.92      0.83      1000
           9       0.89      0.79      0.84      1000

    accuracy                           0.74     10000
   macro avg       0.76      0.74      0.74     10000
weighted avg       0.76      0.74      0.74     10000

Accuracy Score: 0.7435
Root Mean Square Error: 2.0588589072590673
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 15ms/step - accuracy: 0.3328 - loss: 1.8057 - val_accuracy: 0.5494 - val_loss: 1.4594
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5476 - loss: 1.2800 - val_accuracy: 0.5998 - val_loss: 1.2704
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6039 - loss: 1.1325 - val_accuracy: 0.6464 - val_loss: 1.1484
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6356 - loss: 1.0456 - val_accuracy: 0.6612 - val_loss: 1.0967
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6689 - loss: 0.9440 - val_accuracy: 0.6914 - val_loss: 1.0197
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6852 - loss: 0.9067 - val_accuracy: 0.7118 - val_loss: 0.9628
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6952 - loss: 0.8753 - val_accuracy: 0.7148 - val_loss: 0.9445
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7040 - loss: 0.8546 - val_accuracy: 0.7330 - val_loss: 0.8798
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7125 - loss: 0.8213 - val_accuracy: 0.7110 - val_loss: 0.8811
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7259 - loss: 0.7907 - val_accuracy: 0.7066 - val_loss: 0.9003
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7236 - loss: 0.7875 - val_accuracy: 0.7542 - val_loss: 0.7752
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7370 - loss: 0.7531 - val_accuracy: 0.7186 - val_loss: 0.8488
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7409 - loss: 0.7374 - val_accuracy: 0.7350 - val_loss: 0.8108
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7473 - loss: 0.7262 - val_accuracy: 0.7460 - val_loss: 0.7908
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7427 - loss: 0.8022
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.80      0.78      0.79      1000
           1       0.91      0.85      0.88      1000
           2       0.69      0.57      0.62      1000
           3       0.62      0.47      0.54      1000
           4       0.58      0.82      0.68      1000
           5       0.82      0.49      0.62      1000
           6       0.63      0.91      0.74      1000
           7       0.84      0.74      0.79      1000
           8       0.79      0.90      0.84      1000
           9       0.84      0.85      0.85      1000

    accuracy                           0.74     10000
   macro avg       0.75      0.74      0.73     10000
weighted avg       0.75      0.74      0.73     10000

Accuracy Score: 0.7391
Root Mean Square Error: 2.0298275788844724
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 15ms/step - accuracy: 0.3349 - loss: 1.8101 - val_accuracy: 0.5324 - val_loss: 1.4906
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.5418 - loss: 1.2909 - val_accuracy: 0.6142 - val_loss: 1.2801
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6075 - loss: 1.1194 - val_accuracy: 0.6542 - val_loss: 1.1731
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6391 - loss: 1.0207 - val_accuracy: 0.6520 - val_loss: 1.1112
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6636 - loss: 0.9633 - val_accuracy: 0.6730 - val_loss: 1.0712
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6765 - loss: 0.9184 - val_accuracy: 0.7048 - val_loss: 0.9812
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6971 - loss: 0.8707 - val_accuracy: 0.7316 - val_loss: 0.9021
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7153 - loss: 0.8293 - val_accuracy: 0.7050 - val_loss: 0.9235
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7167 - loss: 0.8091 - val_accuracy: 0.7252 - val_loss: 0.8836
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7267 - loss: 0.7828 - val_accuracy: 0.6996 - val_loss: 0.8898
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7009 - loss: 0.9115
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.85      0.64      0.73      1000
           1       0.96      0.75      0.84      1000
           2       0.66      0.52      0.58      1000
           3       0.57      0.49      0.53      1000
           4       0.50      0.85      0.62      1000
           5       0.74      0.53      0.62      1000
           6       0.57      0.92      0.71      1000
           7       0.91      0.60      0.73      1000
           8       0.77      0.88      0.82      1000
           9       0.82      0.82      0.82      1000

    accuracy                           0.70     10000
   macro avg       0.74      0.70      0.70     10000
weighted avg       0.74      0.70      0.70     10000

Accuracy Score: 0.6993
Root Mean Square Error: 2.163307652646752
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 14ms/step - accuracy: 0.3186 - loss: 1.8410 - val_accuracy: 0.5162 - val_loss: 1.4812
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 6ms/step - accuracy: 0.5351 - loss: 1.3061 - val_accuracy: 0.5830 - val_loss: 1.3037
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6003 - loss: 1.1335 - val_accuracy: 0.6602 - val_loss: 1.1361
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6338 - loss: 1.0423 - val_accuracy: 0.6576 - val_loss: 1.1461
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6616 - loss: 0.9723 - val_accuracy: 0.6034 - val_loss: 1.1686
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6738 - loss: 0.9349 - val_accuracy: 0.6894 - val_loss: 1.0362
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6926 - loss: 0.8892 - val_accuracy: 0.7160 - val_loss: 0.9285
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7004 - loss: 0.8518 - val_accuracy: 0.7016 - val_loss: 0.9595
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7136 - loss: 0.8272 - val_accuracy: 0.6920 - val_loss: 0.9638
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7167 - loss: 0.8093 - val_accuracy: 0.7072 - val_loss: 0.9015
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7042 - loss: 0.9165
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.79      0.72      0.75      1000
           1       0.83      0.89      0.86      1000
           2       0.67      0.52      0.58      1000
           3       0.55      0.45      0.49      1000
           4       0.59      0.73      0.66      1000
           5       0.77      0.43      0.56      1000
           6       0.52      0.93      0.67      1000
           7       0.88      0.67      0.76      1000
           8       0.75      0.89      0.82      1000
           9       0.87      0.75      0.81      1000

    accuracy                           0.70     10000
   macro avg       0.72      0.70      0.70     10000
weighted avg       0.72      0.70      0.70     10000

Accuracy Score: 0.6997
Root Mean Square Error: 2.1948348457230216
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 13ms/step - accuracy: 0.3200 - loss: 1.8274 - val_accuracy: 0.5068 - val_loss: 1.5146
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.5394 - loss: 1.2929 - val_accuracy: 0.5846 - val_loss: 1.2844
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6042 - loss: 1.1256 - val_accuracy: 0.6472 - val_loss: 1.1681
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6386 - loss: 1.0291 - val_accuracy: 0.6850 - val_loss: 1.0585
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6647 - loss: 0.9635 - val_accuracy: 0.6854 - val_loss: 1.0040
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6855 - loss: 0.9059 - val_accuracy: 0.6844 - val_loss: 0.9965
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6948 - loss: 0.8735 - val_accuracy: 0.7092 - val_loss: 0.9374
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7064 - loss: 0.8453 - val_accuracy: 0.7230 - val_loss: 0.8945
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7150 - loss: 0.8186 - val_accuracy: 0.7046 - val_loss: 0.8985
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7179 - loss: 0.7966 - val_accuracy: 0.7370 - val_loss: 0.8593
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7294 - loss: 0.7711 - val_accuracy: 0.7446 - val_loss: 0.7902
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7367 - loss: 0.7561 - val_accuracy: 0.7028 - val_loss: 0.8939
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7411 - loss: 0.7403 - val_accuracy: 0.7316 - val_loss: 0.8070
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7484 - loss: 0.7160 - val_accuracy: 0.7472 - val_loss: 0.7648
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7504 - loss: 0.7143 - val_accuracy: 0.7056 - val_loss: 0.8523
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7525 - loss: 0.7081 - val_accuracy: 0.7528 - val_loss: 0.7468
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7626 - loss: 0.6860 - val_accuracy: 0.7382 - val_loss: 0.7827
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7581 - loss: 0.6879 - val_accuracy: 0.7492 - val_loss: 0.7856
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7579 - loss: 0.6864 - val_accuracy: 0.7506 - val_loss: 0.7608
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7473 - loss: 0.7809
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.85      0.72      0.78      1000
           1       0.96      0.78      0.86      1000
           2       0.79      0.50      0.61      1000
           3       0.52      0.68      0.59      1000
           4       0.60      0.84      0.70      1000
           5       0.64      0.70      0.67      1000
           6       0.79      0.81      0.80      1000
           7       0.89      0.69      0.78      1000
           8       0.81      0.89      0.85      1000
           9       0.83      0.85      0.84      1000

    accuracy                           0.75     10000
   macro avg       0.77      0.75      0.75     10000
weighted avg       0.77      0.75      0.75     10000

Accuracy Score: 0.7452
Root Mean Square Error: 1.9999249985936973
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3242 - loss: 1.8216 - val_accuracy: 0.5502 - val_loss: 1.4721
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5435 - loss: 1.2852 - val_accuracy: 0.6092 - val_loss: 1.2851
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6025 - loss: 1.1296 - val_accuracy: 0.6508 - val_loss: 1.1008
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6454 - loss: 1.0168 - val_accuracy: 0.6740 - val_loss: 1.0947
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6670 - loss: 0.9606 - val_accuracy: 0.7114 - val_loss: 0.9649
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6816 - loss: 0.9172 - val_accuracy: 0.6822 - val_loss: 0.9672
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6954 - loss: 0.8782 - val_accuracy: 0.7136 - val_loss: 0.9209
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7052 - loss: 0.8408 - val_accuracy: 0.7176 - val_loss: 0.9120
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7205 - loss: 0.8108 - val_accuracy: 0.6924 - val_loss: 0.9331
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7243 - loss: 0.7900 - val_accuracy: 0.7238 - val_loss: 0.8362
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7269 - loss: 0.7816 - val_accuracy: 0.7518 - val_loss: 0.7976
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7388 - loss: 0.7438 - val_accuracy: 0.6996 - val_loss: 0.8673
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7366 - loss: 0.7472 - val_accuracy: 0.7484 - val_loss: 0.8118
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7492 - loss: 0.7245 - val_accuracy: 0.7504 - val_loss: 0.7826
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7440 - loss: 0.8125
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.85      0.72      0.78      1000
           1       0.87      0.88      0.87      1000
           2       0.61      0.67      0.64      1000
           3       0.52      0.66      0.58      1000
           4       0.72      0.69      0.70      1000
           5       0.71      0.55      0.62      1000
           6       0.70      0.89      0.78      1000
           7       0.91      0.67      0.77      1000
           8       0.75      0.92      0.83      1000
           9       0.91      0.73      0.81      1000

    accuracy                           0.74     10000
   macro avg       0.76      0.74      0.74     10000
weighted avg       0.76      0.74      0.74     10000

Accuracy Score: 0.7385
Root Mean Square Error: 2.05895604615543
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 14ms/step - accuracy: 0.3157 - loss: 1.8472 - val_accuracy: 0.5456 - val_loss: 1.4935
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5408 - loss: 1.2956 - val_accuracy: 0.6158 - val_loss: 1.3026
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5991 - loss: 1.1413 - val_accuracy: 0.6084 - val_loss: 1.2159
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6315 - loss: 1.0485 - val_accuracy: 0.6744 - val_loss: 1.0757
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6517 - loss: 0.9929 - val_accuracy: 0.6678 - val_loss: 1.0461
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6782 - loss: 0.9273 - val_accuracy: 0.6700 - val_loss: 1.0152
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.6844 - loss: 0.9042 - val_accuracy: 0.7154 - val_loss: 0.9428
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7036 - loss: 0.8534 - val_accuracy: 0.7246 - val_loss: 0.9213
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7087 - loss: 0.8373 - val_accuracy: 0.7240 - val_loss: 0.8904
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7227 - loss: 0.7981 - val_accuracy: 0.7002 - val_loss: 0.9147
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7199 - loss: 0.7968 - val_accuracy: 0.7264 - val_loss: 0.8845
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7282 - loss: 0.7731 - val_accuracy: 0.7412 - val_loss: 0.8240
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7360 - loss: 0.7558 - val_accuracy: 0.7364 - val_loss: 0.8326
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7419 - loss: 0.7383 - val_accuracy: 0.7014 - val_loss: 0.8549
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7431 - loss: 0.7344 - val_accuracy: 0.7570 - val_loss: 0.7717
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7501 - loss: 0.7152 - val_accuracy: 0.7418 - val_loss: 0.7793
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7477 - loss: 0.7192 - val_accuracy: 0.7406 - val_loss: 0.7821
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7528 - loss: 0.7021 - val_accuracy: 0.7452 - val_loss: 0.7979
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7400 - loss: 0.8184
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.82      0.76      0.79      1000
           1       0.90      0.85      0.87      1000
           2       0.78      0.51      0.62      1000
           3       0.54      0.56      0.55      1000
           4       0.62      0.80      0.70      1000
           5       0.59      0.73      0.65      1000
           6       0.68      0.90      0.77      1000
           7       0.89      0.67      0.76      1000
           8       0.84      0.86      0.85      1000
           9       0.92      0.74      0.82      1000

    accuracy                           0.74     10000
   macro avg       0.76      0.74      0.74     10000
weighted avg       0.76      0.74      0.74     10000

Accuracy Score: 0.7374
Root Mean Square Error: 1.9812874602136865
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 15s 13ms/step - accuracy: 0.3192 - loss: 1.8370 - val_accuracy: 0.5308 - val_loss: 1.5182
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.5389 - loss: 1.2969 - val_accuracy: 0.6130 - val_loss: 1.2770
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.5985 - loss: 1.1394 - val_accuracy: 0.6638 - val_loss: 1.1336
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6374 - loss: 1.0343 - val_accuracy: 0.6566 - val_loss: 1.0617
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6630 - loss: 0.9634 - val_accuracy: 0.6966 - val_loss: 1.0592
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6766 - loss: 0.9257 - val_accuracy: 0.6966 - val_loss: 0.9798
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7002 - loss: 0.8713 - val_accuracy: 0.7178 - val_loss: 0.9349
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7081 - loss: 0.8385 - val_accuracy: 0.7176 - val_loss: 0.9122
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7174 - loss: 0.8103 - val_accuracy: 0.7254 - val_loss: 0.8543
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7250 - loss: 0.7891 - val_accuracy: 0.7116 - val_loss: 0.8703
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7344 - loss: 0.7610 - val_accuracy: 0.7294 - val_loss: 0.8469
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7389 - loss: 0.7475 - val_accuracy: 0.7398 - val_loss: 0.8138
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7419 - loss: 0.7351 - val_accuracy: 0.7440 - val_loss: 0.8344
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7467 - loss: 0.7249 - val_accuracy: 0.7512 - val_loss: 0.7952
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7482 - loss: 0.7192 - val_accuracy: 0.7292 - val_loss: 0.8051
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7482 - loss: 0.7121 - val_accuracy: 0.7332 - val_loss: 0.8102
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7547 - loss: 0.6888 - val_accuracy: 0.7340 - val_loss: 0.7854
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7316 - loss: 0.8009
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.84      0.74      0.79      1000
           1       0.91      0.85      0.88      1000
           2       0.77      0.50      0.61      1000
           3       0.57      0.57      0.57      1000
           4       0.52      0.86      0.65      1000
           5       0.75      0.57      0.65      1000
           6       0.64      0.91      0.75      1000
           7       0.87      0.67      0.75      1000
           8       0.81      0.87      0.84      1000
           9       0.91      0.77      0.83      1000

    accuracy                           0.73     10000
   macro avg       0.76      0.73      0.73     10000
weighted avg       0.76      0.73      0.73     10000

Accuracy Score: 0.7302
Root Mean Square Error: 1.9787369708983558
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [13]:
patience_data_4 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-4'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=4)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_4, name, history, test_pred)
Output hidden; open in https://colab.research.google.com to view.
In [14]:
patience_data_5 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-5'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_5, name, history, test_pred)
Output hidden; open in https://colab.research.google.com to view.
In [15]:
patience_data_2_df = pd.DataFrame(patience_2_data)
patience_data_2_df
Out[15]:
model accuracy val_accuracy test_accuracy loss val_loss test_loss time
0 DNN 0.698 0.698 0.701 0.862 0.931 0.951 51.180
1 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.698 0.702 0.702 0.864 0.909 0.928 46.438
2 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.707 0.713 0.707 0.841 0.910 0.924 55.858
3 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.667 0.615 0.625 0.952 1.130 1.147 43.561
4 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.736 0.724 0.722 0.749 0.826 0.842 81.421
5 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.733 0.729 0.726 0.765 0.843 0.852 72.815
6 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.728 0.726 0.721 0.772 0.829 0.852 79.005
7 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.691 0.690 0.688 0.885 0.987 1.001 52.090
8 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.676 0.693 0.694 0.923 0.955 0.973 48.970
9 CNN_DO_MP_DO_X3_64_128_256-Pat-2 0.731 0.713 0.717 0.762 0.868 0.884 72.004
In [16]:
patience_data_3_df = pd.DataFrame(patience_data_3)
patience_data_3_df
Out[16]:
model accuracy val_accuracy test_accuracy loss val_loss test_loss time
0 DNN 0.752 0.730 0.727 0.710 0.797 0.816 111.683
1 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.753 0.764 0.766 0.700 0.719 0.738 111.974
2 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.749 0.747 0.743 0.718 0.781 0.795 87.114
3 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.746 0.746 0.739 0.729 0.791 0.807 81.149
4 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.725 0.700 0.699 0.787 0.890 0.910 60.889
5 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.717 0.707 0.700 0.813 0.901 0.917 66.705
6 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.757 0.751 0.745 0.691 0.761 0.782 110.457
7 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.747 0.750 0.738 0.731 0.783 0.807 81.480
8 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.753 0.745 0.737 0.706 0.798 0.822 101.172
9 CNN_DO_MP_DO_X3_64_128_256-Pat-3 0.756 0.734 0.730 0.693 0.785 0.801 101.895
In [17]:
patience_data_4_df = pd.DataFrame(patience_data_4)
patience_data_4_df
Out[17]:
model accuracy val_accuracy test_accuracy loss val_loss test_loss time
0 DNN 0.730 0.719 0.723 0.774 0.837 0.854 64.960
1 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.768 0.748 0.739 0.663 0.791 0.808 115.623
2 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.768 0.764 0.761 0.666 0.733 0.748 126.818
3 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.760 0.731 0.730 0.690 0.789 0.803 108.043
4 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.776 0.752 0.743 0.642 0.748 0.766 149.135
5 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.777 0.739 0.731 0.639 0.796 0.804 132.782
6 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.764 0.744 0.738 0.669 0.762 0.771 116.117
7 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.757 0.747 0.742 0.697 0.780 0.802 111.529
8 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.765 0.750 0.746 0.675 0.764 0.782 129.178
9 CNN_DO_MP_DO_X3_64_128_256-Pat-4 0.766 0.760 0.751 0.666 0.766 0.782 113.736
In [18]:
patience_data_5_df = pd.DataFrame(patience_data_5)
patience_data_5_df
Out[18]:
model accuracy val_accuracy test_accuracy loss val_loss test_loss time
0 DNN 0.786 0.757 0.752 0.610 0.725 0.749 189.169
1 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.761 0.734 0.742 0.677 0.778 0.783 117.719
2 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.751 0.738 0.731 0.710 0.796 0.817 95.279
3 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.781 0.758 0.754 0.618 0.709 0.725 147.939
4 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.766 0.747 0.742 0.663 0.754 0.773 114.888
5 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.780 0.752 0.742 0.629 0.733 0.746 151.310
6 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.771 0.772 0.762 0.653 0.696 0.713 131.389
7 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.767 0.756 0.751 0.661 0.721 0.738 132.738
8 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.779 0.771 0.767 0.627 0.691 0.719 159.406
9 CNN_DO_MP_DO_X3_64_128_256-Pat-5 0.766 0.759 0.754 0.671 0.739 0.752 117.148
In [19]:
patience_data_2_df.drop('model', axis=1, inplace=True)
patience_data_3_df.drop('model', axis=1, inplace=True)
patience_data_4_df.drop('model', axis=1, inplace=True)
patience_data_5_df.drop('model', axis=1, inplace=True)
In [20]:
patience_data_2_df.astype('float').describe()
Out[20]:
accuracy val_accuracy test_accuracy loss val_loss test_loss time
count 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000
mean 0.706500 0.700300 0.700300 0.837500 0.918800 0.935400 60.334200
std 0.024744 0.033019 0.029341 0.072319 0.091828 0.092012 14.378219
min 0.667000 0.615000 0.625000 0.749000 0.826000 0.842000 43.561000
25% 0.692750 0.694250 0.695750 0.766750 0.849250 0.860000 49.522500
50% 0.702500 0.707500 0.704500 0.851500 0.909500 0.926000 53.974000
75% 0.730250 0.721250 0.720000 0.879750 0.949000 0.967500 72.612250
max 0.736000 0.729000 0.726000 0.952000 1.130000 1.147000 81.421000
In [21]:
patience_data_3_df.astype('float').describe()
Out[21]:
accuracy val_accuracy test_accuracy loss val_loss test_loss time
count 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000
mean 0.745500 0.737400 0.732400 0.727800 0.800600 0.819500 91.451800
std 0.013517 0.020178 0.020266 0.040827 0.055125 0.054791 18.756013
min 0.717000 0.700000 0.699000 0.691000 0.719000 0.738000 60.889000
25% 0.746250 0.731000 0.727750 0.701500 0.781500 0.796500 81.231750
50% 0.750500 0.745500 0.737500 0.714000 0.788000 0.807000 94.143000
75% 0.753000 0.749250 0.742000 0.730500 0.797750 0.820500 108.316500
max 0.757000 0.764000 0.766000 0.813000 0.901000 0.917000 111.974000
In [22]:
patience_data_4_df.astype('float').describe()
Out[22]:
accuracy val_accuracy test_accuracy loss val_loss test_loss time
count 10.000000 10.000000 10.000000 10.000000 10.00000 10.000000 10.000000
mean 0.763100 0.745400 0.740400 0.678100 0.77660 0.792000 116.792100
std 0.013178 0.013251 0.010977 0.038211 0.02906 0.029284 22.004154
min 0.730000 0.719000 0.723000 0.639000 0.73300 0.748000 64.960000
25% 0.761000 0.740250 0.732750 0.663750 0.76250 0.773750 112.080750
50% 0.765500 0.747500 0.740500 0.667500 0.77300 0.792000 115.870000
75% 0.768000 0.751500 0.745250 0.686250 0.79050 0.803750 128.588000
max 0.777000 0.764000 0.761000 0.774000 0.83700 0.854000 149.135000
In [23]:
patience_data_5_df.astype('float').describe()
Out[23]:
accuracy val_accuracy test_accuracy loss val_loss test_loss time
count 10.000000 10.000000 10.000000 10.00000 10.00000 10.00000 10.000000
mean 0.770800 0.754400 0.749700 0.65190 0.73420 0.75150 135.698500
std 0.010727 0.012358 0.010657 0.03096 0.03395 0.03198 27.031302
min 0.751000 0.734000 0.731000 0.61000 0.69100 0.71300 95.279000
25% 0.766000 0.748250 0.742000 0.62750 0.71200 0.72825 117.290750
50% 0.769000 0.756500 0.751500 0.65700 0.72900 0.74750 132.063500
75% 0.779750 0.758750 0.754000 0.66900 0.75025 0.76775 150.467250
max 0.786000 0.772000 0.767000 0.71000 0.79600 0.81700 189.169000
In [24]:
patience_data_6 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-6'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=6)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_6, name, history, test_pred)
Output hidden; open in https://colab.research.google.com to view.
In [11]:
patience_data_8 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-8'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=8)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_8, name, history, test_pred)
Output hidden; open in https://colab.research.google.com to view.
In [ ]:
patience_data_10 = {}
for i in range(10):
  # Build Model 2-4: CNN and MaxPool with dropouts between and after
  name = f'CNN_DO_MP_DO_X3_64_128_256-Pat-10'
  k.clear_session()
  model = models.Sequential()
  model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), input_shape=((32,32,3,)), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1),activation=tf.nn.relu))
  model.add(layers.Dropout(0.25))
  model.add(layers.MaxPool2D((2, 2),strides=2))
  model.add(layers.Dropout(0.25))
  model.add(layers.Flatten())
  model.add(layers.Dense(units=10, activation=tf.nn.softmax))
  keras.utils.plot_model(model, f"CIFAR10_{name}.png", show_shapes=True)
  model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy'])
  time_start = time.time()
  history = model.fit(image_train_norm, label_train_split, epochs=200, batch_size=64, validation_data=(image_val_norm, label_val_split), callbacks=[tf.keras.callbacks.ModelCheckpoint(f"{name}_model.keras",save_best_only=True,save_weights_only=False)
                      ,tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10)])
  time_end = time.time()
  preds = model.predict(image_test_norm)
  test_pred = model.evaluate(image_test_norm, test_labels)

  history_dict = history.history
  history_df=pd.DataFrame(history_dict)
  plt.subplots(figsize=(16,12))
  plt.tight_layout()
  display_training_curves(history_df['accuracy'], history_df['val_accuracy'], 'accuracy', 211)
  display_training_curves(history_df['loss'], history_df['val_loss'], 'loss', 212)
  pred= model.predict(image_test_norm)
  pred=np.argmax(pred, axis=1)
  print_validation_report(test_labels, pred)
  plot_confusion_matrix(test_labels, pred)
  add_to_data(patience_data_10, name, history, test_pred)
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 16s 14ms/step - accuracy: 0.3274 - loss: 1.8268 - val_accuracy: 0.5364 - val_loss: 1.4608
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5431 - loss: 1.2872 - val_accuracy: 0.6118 - val_loss: 1.2867
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6078 - loss: 1.1183 - val_accuracy: 0.6582 - val_loss: 1.1551
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6466 - loss: 1.0189 - val_accuracy: 0.6290 - val_loss: 1.0941
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6639 - loss: 0.9578 - val_accuracy: 0.6818 - val_loss: 1.0511
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6845 - loss: 0.9063 - val_accuracy: 0.7186 - val_loss: 0.9677
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6997 - loss: 0.8643 - val_accuracy: 0.7348 - val_loss: 0.9255
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7073 - loss: 0.8388 - val_accuracy: 0.7126 - val_loss: 0.9224
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7173 - loss: 0.8052 - val_accuracy: 0.6888 - val_loss: 0.9279
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7291 - loss: 0.7815 - val_accuracy: 0.7554 - val_loss: 0.8379
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7296 - loss: 0.7841 - val_accuracy: 0.7302 - val_loss: 0.8549
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7393 - loss: 0.7492 - val_accuracy: 0.7528 - val_loss: 0.7944
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7436 - loss: 0.7352 - val_accuracy: 0.7394 - val_loss: 0.8120
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7463 - loss: 0.7257 - val_accuracy: 0.7476 - val_loss: 0.7714
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7480 - loss: 0.7205 - val_accuracy: 0.7510 - val_loss: 0.7849
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7549 - loss: 0.6962 - val_accuracy: 0.7384 - val_loss: 0.7997
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7599 - loss: 0.6858 - val_accuracy: 0.7564 - val_loss: 0.7463
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7603 - loss: 0.6835 - val_accuracy: 0.7632 - val_loss: 0.7682
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7687 - loss: 0.6659 - val_accuracy: 0.7654 - val_loss: 0.7448
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7653 - loss: 0.6730 - val_accuracy: 0.7672 - val_loss: 0.7437
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7729 - loss: 0.6479 - val_accuracy: 0.7572 - val_loss: 0.7479
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7721 - loss: 0.6457 - val_accuracy: 0.7774 - val_loss: 0.7015
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7757 - loss: 0.6425 - val_accuracy: 0.7620 - val_loss: 0.7048
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7768 - loss: 0.6389 - val_accuracy: 0.7602 - val_loss: 0.7347
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7725 - loss: 0.6425 - val_accuracy: 0.7608 - val_loss: 0.7323
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7843 - loss: 0.6229 - val_accuracy: 0.7672 - val_loss: 0.6976
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7852 - loss: 0.6137 - val_accuracy: 0.7420 - val_loss: 0.7566
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7822 - loss: 0.6260 - val_accuracy: 0.7726 - val_loss: 0.6956
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.7830 - loss: 0.6227 - val_accuracy: 0.7694 - val_loss: 0.7028
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7862 - loss: 0.6047 - val_accuracy: 0.7840 - val_loss: 0.6715
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7918 - loss: 0.5962 - val_accuracy: 0.7646 - val_loss: 0.6997
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7883 - loss: 0.6049 - val_accuracy: 0.7752 - val_loss: 0.6897
Epoch 33/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7896 - loss: 0.6009 - val_accuracy: 0.7636 - val_loss: 0.7109
Epoch 34/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 4s 6ms/step - accuracy: 0.7909 - loss: 0.5995 - val_accuracy: 0.7552 - val_loss: 0.7216
Epoch 35/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7906 - loss: 0.5893 - val_accuracy: 0.7630 - val_loss: 0.7005
Epoch 36/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.8003 - loss: 0.5742 - val_accuracy: 0.7432 - val_loss: 0.7398
Epoch 37/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7952 - loss: 0.5911 - val_accuracy: 0.7740 - val_loss: 0.6923
Epoch 38/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7905 - loss: 0.5862 - val_accuracy: 0.7562 - val_loss: 0.7355
Epoch 39/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7966 - loss: 0.5776 - val_accuracy: 0.7692 - val_loss: 0.6925
Epoch 40/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.8040 - loss: 0.5640 - val_accuracy: 0.7806 - val_loss: 0.6670
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - accuracy: 0.7733 - loss: 0.6816
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.80      0.82      0.81      1000
           1       0.92      0.89      0.90      1000
           2       0.82      0.57      0.67      1000
           3       0.65      0.56      0.60      1000
           4       0.64      0.83      0.72      1000
           5       0.72      0.68      0.70      1000
           6       0.71      0.90      0.80      1000
           7       0.90      0.73      0.81      1000
           8       0.80      0.92      0.86      1000
           9       0.88      0.85      0.86      1000

    accuracy                           0.78     10000
   macro avg       0.78      0.78      0.77     10000
weighted avg       0.78      0.78      0.77     10000

Accuracy Score: 0.7759
Root Mean Square Error: 1.8929606440705522
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 15ms/step - accuracy: 0.3311 - loss: 1.8128 - val_accuracy: 0.5420 - val_loss: 1.4862
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 7ms/step - accuracy: 0.5457 - loss: 1.2736 - val_accuracy: 0.5920 - val_loss: 1.2587
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.6044 - loss: 1.1308 - val_accuracy: 0.6380 - val_loss: 1.1435
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.6401 - loss: 1.0277 - val_accuracy: 0.6714 - val_loss: 1.0636
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6683 - loss: 0.9479 - val_accuracy: 0.6740 - val_loss: 1.0209
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6845 - loss: 0.9045 - val_accuracy: 0.6980 - val_loss: 0.9292
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7028 - loss: 0.8501 - val_accuracy: 0.7010 - val_loss: 0.9062
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7131 - loss: 0.8273 - val_accuracy: 0.6972 - val_loss: 0.9268
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7132 - loss: 0.8130 - val_accuracy: 0.6988 - val_loss: 0.9554
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7306 - loss: 0.7696 - val_accuracy: 0.6868 - val_loss: 0.9243
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7270 - loss: 0.7684 - val_accuracy: 0.7122 - val_loss: 0.8773
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7436 - loss: 0.7394 - val_accuracy: 0.7242 - val_loss: 0.8447
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7429 - loss: 0.7337 - val_accuracy: 0.7536 - val_loss: 0.7815
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7471 - loss: 0.7219 - val_accuracy: 0.7412 - val_loss: 0.7831
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7455 - loss: 0.7215 - val_accuracy: 0.7538 - val_loss: 0.7652
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7567 - loss: 0.7069 - val_accuracy: 0.7452 - val_loss: 0.7886
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7601 - loss: 0.6773 - val_accuracy: 0.7660 - val_loss: 0.7611
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7568 - loss: 0.6929 - val_accuracy: 0.7686 - val_loss: 0.7302
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7600 - loss: 0.6825 - val_accuracy: 0.7342 - val_loss: 0.7778
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7654 - loss: 0.6730 - val_accuracy: 0.7198 - val_loss: 0.8000
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7674 - loss: 0.6630 - val_accuracy: 0.7594 - val_loss: 0.7387
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7747 - loss: 0.6450 - val_accuracy: 0.7662 - val_loss: 0.7142
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7740 - loss: 0.6481 - val_accuracy: 0.7718 - val_loss: 0.6964
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7698 - loss: 0.6484 - val_accuracy: 0.7688 - val_loss: 0.7170
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7786 - loss: 0.6336 - val_accuracy: 0.7740 - val_loss: 0.6777
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7797 - loss: 0.6237 - val_accuracy: 0.7698 - val_loss: 0.7150
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7762 - loss: 0.6314 - val_accuracy: 0.7622 - val_loss: 0.6998
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7840 - loss: 0.6173 - val_accuracy: 0.7780 - val_loss: 0.6731
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7848 - loss: 0.6173 - val_accuracy: 0.7550 - val_loss: 0.7208
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7836 - loss: 0.6116 - val_accuracy: 0.7742 - val_loss: 0.6743
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7894 - loss: 0.6019 - val_accuracy: 0.7620 - val_loss: 0.7069
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7866 - loss: 0.6003 - val_accuracy: 0.7754 - val_loss: 0.6699
Epoch 33/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7957 - loss: 0.5857 - val_accuracy: 0.7614 - val_loss: 0.6972
Epoch 34/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.7912 - loss: 0.5885 - val_accuracy: 0.7438 - val_loss: 0.7399
Epoch 35/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7936 - loss: 0.5927 - val_accuracy: 0.7638 - val_loss: 0.6975
Epoch 36/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7976 - loss: 0.5729 - val_accuracy: 0.7330 - val_loss: 0.7758
Epoch 37/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7944 - loss: 0.5823 - val_accuracy: 0.7680 - val_loss: 0.6805
Epoch 38/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7980 - loss: 0.5693 - val_accuracy: 0.7324 - val_loss: 0.7663
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7345 - loss: 0.7864
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.86      0.76      0.81      1000
           1       0.94      0.81      0.87      1000
           2       0.79      0.55      0.65      1000
           3       0.56      0.62      0.59      1000
           4       0.67      0.71      0.69      1000
           5       0.78      0.54      0.64      1000
           6       0.49      0.97      0.65      1000
           7       0.91      0.70      0.79      1000
           8       0.82      0.88      0.85      1000
           9       0.91      0.78      0.84      1000

    accuracy                           0.73     10000
   macro avg       0.77      0.73      0.74     10000
weighted avg       0.77      0.73      0.74     10000

Accuracy Score: 0.7321
Root Mean Square Error: 1.9651463049859672
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 15ms/step - accuracy: 0.3283 - loss: 1.8114 - val_accuracy: 0.5330 - val_loss: 1.4984
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.5464 - loss: 1.2688 - val_accuracy: 0.6030 - val_loss: 1.2624
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6139 - loss: 1.1017 - val_accuracy: 0.6542 - val_loss: 1.1572
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6437 - loss: 1.0225 - val_accuracy: 0.6448 - val_loss: 1.1257
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6677 - loss: 0.9488 - val_accuracy: 0.6900 - val_loss: 0.9883
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6834 - loss: 0.9119 - val_accuracy: 0.7050 - val_loss: 0.9464
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7013 - loss: 0.8566 - val_accuracy: 0.6940 - val_loss: 0.9470
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7046 - loss: 0.8379 - val_accuracy: 0.7150 - val_loss: 0.9096
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7147 - loss: 0.8206 - val_accuracy: 0.7014 - val_loss: 0.9196
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7254 - loss: 0.7880 - val_accuracy: 0.7262 - val_loss: 0.8937
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7366 - loss: 0.7633 - val_accuracy: 0.7332 - val_loss: 0.8380
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7420 - loss: 0.7363 - val_accuracy: 0.7178 - val_loss: 0.8402
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.7411 - loss: 0.7409 - val_accuracy: 0.7198 - val_loss: 0.8501
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7389 - loss: 0.7378 - val_accuracy: 0.7552 - val_loss: 0.7661
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7538 - loss: 0.7055 - val_accuracy: 0.7322 - val_loss: 0.8034
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7575 - loss: 0.6948 - val_accuracy: 0.7608 - val_loss: 0.7550
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7593 - loss: 0.6895 - val_accuracy: 0.7308 - val_loss: 0.7872
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7600 - loss: 0.6850 - val_accuracy: 0.7510 - val_loss: 0.7654
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7613 - loss: 0.6700 - val_accuracy: 0.7500 - val_loss: 0.7489
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7730 - loss: 0.6558 - val_accuracy: 0.7434 - val_loss: 0.7607
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7737 - loss: 0.6534 - val_accuracy: 0.7406 - val_loss: 0.7604
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7729 - loss: 0.6476 - val_accuracy: 0.7516 - val_loss: 0.7402
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7742 - loss: 0.6458 - val_accuracy: 0.7518 - val_loss: 0.7412
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7758 - loss: 0.6404 - val_accuracy: 0.7554 - val_loss: 0.7437
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7752 - loss: 0.6387 - val_accuracy: 0.7616 - val_loss: 0.7021
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7811 - loss: 0.6277 - val_accuracy: 0.7696 - val_loss: 0.7085
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7847 - loss: 0.6158 - val_accuracy: 0.7428 - val_loss: 0.7432
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7830 - loss: 0.6147 - val_accuracy: 0.7712 - val_loss: 0.7080
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7888 - loss: 0.6040 - val_accuracy: 0.7654 - val_loss: 0.6850
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7921 - loss: 0.5978 - val_accuracy: 0.7722 - val_loss: 0.6937
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7875 - loss: 0.6052 - val_accuracy: 0.7796 - val_loss: 0.6740
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7906 - loss: 0.5966 - val_accuracy: 0.7702 - val_loss: 0.6901
Epoch 33/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7926 - loss: 0.5927 - val_accuracy: 0.7854 - val_loss: 0.6553
Epoch 34/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7923 - loss: 0.5920 - val_accuracy: 0.7852 - val_loss: 0.6552
Epoch 35/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7957 - loss: 0.5826 - val_accuracy: 0.7778 - val_loss: 0.6896
Epoch 36/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 6ms/step - accuracy: 0.7923 - loss: 0.5907 - val_accuracy: 0.7516 - val_loss: 0.7098
Epoch 37/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7986 - loss: 0.5746 - val_accuracy: 0.7440 - val_loss: 0.7403
Epoch 38/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7972 - loss: 0.5810 - val_accuracy: 0.7750 - val_loss: 0.6808
Epoch 39/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7980 - loss: 0.5745 - val_accuracy: 0.7392 - val_loss: 0.7368
Epoch 40/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8020 - loss: 0.5750 - val_accuracy: 0.7836 - val_loss: 0.6523
Epoch 41/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7973 - loss: 0.5752 - val_accuracy: 0.7558 - val_loss: 0.6883
Epoch 42/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.7913 - loss: 0.5854 - val_accuracy: 0.7568 - val_loss: 0.7148
Epoch 43/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8049 - loss: 0.5611 - val_accuracy: 0.7590 - val_loss: 0.6827
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7682 - loss: 0.6896
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.84      0.78      0.81      1000
           1       0.95      0.86      0.90      1000
           2       0.83      0.51      0.63      1000
           3       0.65      0.59      0.62      1000
           4       0.56      0.89      0.69      1000
           5       0.72      0.66      0.69      1000
           6       0.72      0.89      0.80      1000
           7       0.90      0.74      0.81      1000
           8       0.82      0.91      0.86      1000
           9       0.89      0.84      0.86      1000

    accuracy                           0.77     10000
   macro avg       0.79      0.77      0.77     10000
weighted avg       0.79      0.77      0.77     10000

Accuracy Score: 0.7668
Root Mean Square Error: 1.8611555550248884
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 15ms/step - accuracy: 0.3418 - loss: 1.7890 - val_accuracy: 0.5416 - val_loss: 1.4797
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.5457 - loss: 1.2806 - val_accuracy: 0.6228 - val_loss: 1.2868
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6099 - loss: 1.1213 - val_accuracy: 0.6316 - val_loss: 1.1571
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6394 - loss: 1.0292 - val_accuracy: 0.6346 - val_loss: 1.1407
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6698 - loss: 0.9575 - val_accuracy: 0.6778 - val_loss: 1.0521
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6815 - loss: 0.9073 - val_accuracy: 0.7066 - val_loss: 0.9567
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6944 - loss: 0.8708 - val_accuracy: 0.6790 - val_loss: 0.9855
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7028 - loss: 0.8417 - val_accuracy: 0.6804 - val_loss: 0.9697
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7193 - loss: 0.8050 - val_accuracy: 0.7254 - val_loss: 0.8657
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7209 - loss: 0.7947 - val_accuracy: 0.7184 - val_loss: 0.8833
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7309 - loss: 0.7691 - val_accuracy: 0.7326 - val_loss: 0.8312
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7367 - loss: 0.7507 - val_accuracy: 0.7032 - val_loss: 0.8734
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7446 - loss: 0.7305 - val_accuracy: 0.7206 - val_loss: 0.8247
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7480 - loss: 0.7277 - val_accuracy: 0.7338 - val_loss: 0.8583
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7549 - loss: 0.7000 - val_accuracy: 0.7582 - val_loss: 0.7537
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7584 - loss: 0.6859 - val_accuracy: 0.7676 - val_loss: 0.7514
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.7603 - loss: 0.6912 - val_accuracy: 0.7550 - val_loss: 0.7584
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7657 - loss: 0.6757 - val_accuracy: 0.7510 - val_loss: 0.7771
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7691 - loss: 0.6675 - val_accuracy: 0.7314 - val_loss: 0.7888
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7631 - loss: 0.6723 - val_accuracy: 0.7578 - val_loss: 0.7528
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7695 - loss: 0.6528 - val_accuracy: 0.7552 - val_loss: 0.7645
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7696 - loss: 0.6549 - val_accuracy: 0.7652 - val_loss: 0.7270
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7753 - loss: 0.6406 - val_accuracy: 0.7574 - val_loss: 0.7246
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7818 - loss: 0.6176 - val_accuracy: 0.7598 - val_loss: 0.7278
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.7815 - loss: 0.6189 - val_accuracy: 0.7768 - val_loss: 0.6852
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7798 - loss: 0.6254 - val_accuracy: 0.7642 - val_loss: 0.7205
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7833 - loss: 0.6149 - val_accuracy: 0.7380 - val_loss: 0.7591
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7880 - loss: 0.6076 - val_accuracy: 0.7478 - val_loss: 0.7611
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7842 - loss: 0.6099 - val_accuracy: 0.7650 - val_loss: 0.7227
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7774 - loss: 0.6298 - val_accuracy: 0.7578 - val_loss: 0.7142
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7949 - loss: 0.5866 - val_accuracy: 0.7806 - val_loss: 0.6863
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7902 - loss: 0.5993 - val_accuracy: 0.7800 - val_loss: 0.6939
Epoch 33/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7908 - loss: 0.5866 - val_accuracy: 0.7700 - val_loss: 0.6826
Epoch 34/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7943 - loss: 0.5861 - val_accuracy: 0.7750 - val_loss: 0.6847
Epoch 35/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7973 - loss: 0.5762 - val_accuracy: 0.7744 - val_loss: 0.6760
Epoch 36/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7979 - loss: 0.5799 - val_accuracy: 0.7578 - val_loss: 0.7220
Epoch 37/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7960 - loss: 0.5821 - val_accuracy: 0.7530 - val_loss: 0.7372
Epoch 38/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8008 - loss: 0.5697 - val_accuracy: 0.7464 - val_loss: 0.7368
Epoch 39/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8008 - loss: 0.5721 - val_accuracy: 0.7764 - val_loss: 0.6738
Epoch 40/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.8026 - loss: 0.5620 - val_accuracy: 0.7742 - val_loss: 0.6656
Epoch 41/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8071 - loss: 0.5536 - val_accuracy: 0.7820 - val_loss: 0.6647
Epoch 42/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8052 - loss: 0.5536 - val_accuracy: 0.7766 - val_loss: 0.6564
Epoch 43/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8049 - loss: 0.5552 - val_accuracy: 0.7754 - val_loss: 0.6750
Epoch 44/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.8061 - loss: 0.5514 - val_accuracy: 0.7724 - val_loss: 0.6841
Epoch 45/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8060 - loss: 0.5539 - val_accuracy: 0.7924 - val_loss: 0.6464
Epoch 46/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8072 - loss: 0.5445 - val_accuracy: 0.7806 - val_loss: 0.6857
Epoch 47/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8042 - loss: 0.5562 - val_accuracy: 0.7530 - val_loss: 0.7228
Epoch 48/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.8074 - loss: 0.5491 - val_accuracy: 0.7764 - val_loss: 0.6759
Epoch 49/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8107 - loss: 0.5404 - val_accuracy: 0.7706 - val_loss: 0.6729
Epoch 50/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8114 - loss: 0.5387 - val_accuracy: 0.7810 - val_loss: 0.6543
Epoch 51/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8126 - loss: 0.5326 - val_accuracy: 0.7818 - val_loss: 0.6486
Epoch 52/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.8129 - loss: 0.5376 - val_accuracy: 0.7794 - val_loss: 0.6531
Epoch 53/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8143 - loss: 0.5280 - val_accuracy: 0.7764 - val_loss: 0.6493
Epoch 54/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8149 - loss: 0.5236 - val_accuracy: 0.7678 - val_loss: 0.6832
Epoch 55/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8102 - loss: 0.5395 - val_accuracy: 0.7586 - val_loss: 0.7061
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7505 - loss: 0.7196
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.86      0.77      0.81      1000
           1       0.95      0.81      0.88      1000
           2       0.78      0.57      0.66      1000
           3       0.63      0.55      0.58      1000
           4       0.57      0.87      0.69      1000
           5       0.73      0.63      0.68      1000
           6       0.61      0.93      0.73      1000
           7       0.93      0.65      0.77      1000
           8       0.84      0.88      0.86      1000
           9       0.86      0.83      0.84      1000

    accuracy                           0.75     10000
   macro avg       0.77      0.75      0.75     10000
weighted avg       0.77      0.75      0.75     10000

Accuracy Score: 0.7494
Root Mean Square Error: 1.911203809121361
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 16ms/step - accuracy: 0.3329 - loss: 1.8039 - val_accuracy: 0.5530 - val_loss: 1.4348
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.5464 - loss: 1.2724 - val_accuracy: 0.6218 - val_loss: 1.2568
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6110 - loss: 1.1186 - val_accuracy: 0.6268 - val_loss: 1.1620
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6375 - loss: 1.0333 - val_accuracy: 0.6830 - val_loss: 1.0632
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6655 - loss: 0.9669 - val_accuracy: 0.7004 - val_loss: 1.0023
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6855 - loss: 0.9065 - val_accuracy: 0.6942 - val_loss: 0.9550
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6985 - loss: 0.8600 - val_accuracy: 0.7078 - val_loss: 0.9538
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7065 - loss: 0.8461 - val_accuracy: 0.6926 - val_loss: 0.9342
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7194 - loss: 0.8101 - val_accuracy: 0.7248 - val_loss: 0.8706
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7252 - loss: 0.7926 - val_accuracy: 0.7270 - val_loss: 0.8358
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7260 - loss: 0.7822 - val_accuracy: 0.7326 - val_loss: 0.8375
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7384 - loss: 0.7446 - val_accuracy: 0.7462 - val_loss: 0.8114
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7407 - loss: 0.7417 - val_accuracy: 0.7436 - val_loss: 0.7848
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7454 - loss: 0.7242 - val_accuracy: 0.7578 - val_loss: 0.7538
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7448 - loss: 0.7229 - val_accuracy: 0.7412 - val_loss: 0.8000
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7564 - loss: 0.6994 - val_accuracy: 0.7574 - val_loss: 0.7406
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7559 - loss: 0.6960 - val_accuracy: 0.7584 - val_loss: 0.7498
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7616 - loss: 0.6807 - val_accuracy: 0.7532 - val_loss: 0.7444
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7629 - loss: 0.6713 - val_accuracy: 0.7298 - val_loss: 0.7906
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7662 - loss: 0.6756 - val_accuracy: 0.7578 - val_loss: 0.7546
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7712 - loss: 0.6501 - val_accuracy: 0.7694 - val_loss: 0.7377
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7718 - loss: 0.6530 - val_accuracy: 0.7882 - val_loss: 0.6762
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7718 - loss: 0.6461 - val_accuracy: 0.7594 - val_loss: 0.7409
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7779 - loss: 0.6372 - val_accuracy: 0.7698 - val_loss: 0.6969
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7739 - loss: 0.6400 - val_accuracy: 0.7570 - val_loss: 0.7226
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7772 - loss: 0.6341 - val_accuracy: 0.7568 - val_loss: 0.7247
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7818 - loss: 0.6175 - val_accuracy: 0.7558 - val_loss: 0.7357
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7815 - loss: 0.6213 - val_accuracy: 0.7684 - val_loss: 0.6885
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7829 - loss: 0.6158 - val_accuracy: 0.7334 - val_loss: 0.7667
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7872 - loss: 0.6104 - val_accuracy: 0.7592 - val_loss: 0.6967
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7886 - loss: 0.5967 - val_accuracy: 0.7782 - val_loss: 0.6784
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7866 - loss: 0.6087 - val_accuracy: 0.7786 - val_loss: 0.6600
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7771 - loss: 0.6791
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.83      0.79      0.81      1000
           1       0.93      0.84      0.88      1000
           2       0.76      0.61      0.68      1000
           3       0.60      0.62      0.61      1000
           4       0.63      0.87      0.73      1000
           5       0.79      0.61      0.69      1000
           6       0.74      0.89      0.81      1000
           7       0.87      0.75      0.80      1000
           8       0.81      0.91      0.86      1000
           9       0.88      0.84      0.86      1000

    accuracy                           0.77     10000
   macro avg       0.78      0.77      0.77     10000
weighted avg       0.78      0.77      0.77     10000

Accuracy Score: 0.774
Root Mean Square Error: 1.9160375779195982
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 15ms/step - accuracy: 0.3345 - loss: 1.8036 - val_accuracy: 0.5734 - val_loss: 1.4810
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 9s 7ms/step - accuracy: 0.5527 - loss: 1.2689 - val_accuracy: 0.5814 - val_loss: 1.2667
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6122 - loss: 1.1087 - val_accuracy: 0.6194 - val_loss: 1.1480
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6464 - loss: 1.0218 - val_accuracy: 0.6698 - val_loss: 1.0476
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6681 - loss: 0.9507 - val_accuracy: 0.6958 - val_loss: 0.9938
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6842 - loss: 0.9077 - val_accuracy: 0.6708 - val_loss: 0.9821
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6981 - loss: 0.8692 - val_accuracy: 0.7206 - val_loss: 0.8885
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7091 - loss: 0.8341 - val_accuracy: 0.7180 - val_loss: 0.9032
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7198 - loss: 0.8022 - val_accuracy: 0.7038 - val_loss: 0.9094
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7229 - loss: 0.7893 - val_accuracy: 0.7432 - val_loss: 0.8509
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7365 - loss: 0.7622 - val_accuracy: 0.7540 - val_loss: 0.8002
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7406 - loss: 0.7372 - val_accuracy: 0.7474 - val_loss: 0.8208
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7417 - loss: 0.7358 - val_accuracy: 0.7272 - val_loss: 0.8191
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7431 - loss: 0.7265 - val_accuracy: 0.7514 - val_loss: 0.7762
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7592 - loss: 0.6889 - val_accuracy: 0.7512 - val_loss: 0.7669
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7566 - loss: 0.6957 - val_accuracy: 0.7618 - val_loss: 0.7546
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7615 - loss: 0.6779 - val_accuracy: 0.7610 - val_loss: 0.7476
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7633 - loss: 0.6730 - val_accuracy: 0.7636 - val_loss: 0.7289
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7707 - loss: 0.6490 - val_accuracy: 0.7462 - val_loss: 0.7660
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7679 - loss: 0.6578 - val_accuracy: 0.7500 - val_loss: 0.7540
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7692 - loss: 0.6578 - val_accuracy: 0.7576 - val_loss: 0.7311
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7779 - loss: 0.6332 - val_accuracy: 0.7550 - val_loss: 0.7497
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7739 - loss: 0.6429 - val_accuracy: 0.7780 - val_loss: 0.6810
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7815 - loss: 0.6220 - val_accuracy: 0.7632 - val_loss: 0.7106
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7837 - loss: 0.6167 - val_accuracy: 0.7832 - val_loss: 0.6634
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7837 - loss: 0.6185 - val_accuracy: 0.7834 - val_loss: 0.6825
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7829 - loss: 0.6160 - val_accuracy: 0.7592 - val_loss: 0.7125
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7852 - loss: 0.6067 - val_accuracy: 0.7552 - val_loss: 0.7134
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7888 - loss: 0.6034 - val_accuracy: 0.7800 - val_loss: 0.6740
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7878 - loss: 0.6094 - val_accuracy: 0.7654 - val_loss: 0.7015
Epoch 31/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7914 - loss: 0.5946 - val_accuracy: 0.7808 - val_loss: 0.6864
Epoch 32/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7933 - loss: 0.5873 - val_accuracy: 0.7788 - val_loss: 0.6812
Epoch 33/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7977 - loss: 0.5739 - val_accuracy: 0.7798 - val_loss: 0.6835
Epoch 34/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7975 - loss: 0.5786 - val_accuracy: 0.7620 - val_loss: 0.7110
Epoch 35/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7977 - loss: 0.5724 - val_accuracy: 0.7726 - val_loss: 0.6721
Epoch 36/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8007 - loss: 0.5736 - val_accuracy: 0.7898 - val_loss: 0.6473
Epoch 37/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7977 - loss: 0.5753 - val_accuracy: 0.7902 - val_loss: 0.6538
Epoch 38/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7965 - loss: 0.5781 - val_accuracy: 0.7752 - val_loss: 0.6643
Epoch 39/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8017 - loss: 0.5621 - val_accuracy: 0.7694 - val_loss: 0.6887
Epoch 40/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.8009 - loss: 0.5665 - val_accuracy: 0.7960 - val_loss: 0.6322
Epoch 41/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8069 - loss: 0.5538 - val_accuracy: 0.7758 - val_loss: 0.6582
Epoch 42/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8054 - loss: 0.5549 - val_accuracy: 0.7666 - val_loss: 0.6899
Epoch 43/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8070 - loss: 0.5421 - val_accuracy: 0.7742 - val_loss: 0.6604
Epoch 44/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.8017 - loss: 0.5637 - val_accuracy: 0.7684 - val_loss: 0.6863
Epoch 45/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 8ms/step - accuracy: 0.8090 - loss: 0.5405 - val_accuracy: 0.7876 - val_loss: 0.6454
Epoch 46/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.8101 - loss: 0.5397 - val_accuracy: 0.7748 - val_loss: 0.6704
Epoch 47/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8046 - loss: 0.5550 - val_accuracy: 0.7868 - val_loss: 0.6371
Epoch 48/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8070 - loss: 0.5503 - val_accuracy: 0.7582 - val_loss: 0.7109
Epoch 49/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.8097 - loss: 0.5362 - val_accuracy: 0.7664 - val_loss: 0.6985
Epoch 50/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.8115 - loss: 0.5355 - val_accuracy: 0.7766 - val_loss: 0.6620
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7764 - loss: 0.6758
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.78      0.82      0.80      1000
           1       0.91      0.89      0.90      1000
           2       0.77      0.61      0.68      1000
           3       0.64      0.57      0.60      1000
           4       0.66      0.82      0.74      1000
           5       0.76      0.64      0.69      1000
           6       0.68      0.92      0.78      1000
           7       0.85      0.78      0.81      1000
           8       0.86      0.90      0.88      1000
           9       0.90      0.79      0.84      1000

    accuracy                           0.77     10000
   macro avg       0.78      0.77      0.77     10000
weighted avg       0.78      0.77      0.77     10000

Accuracy Score: 0.7744
Root Mean Square Error: 1.9171854370404549
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 16ms/step - accuracy: 0.3264 - loss: 1.8086 - val_accuracy: 0.5510 - val_loss: 1.4450
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5338 - loss: 1.3022 - val_accuracy: 0.6094 - val_loss: 1.3131
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5999 - loss: 1.1358 - val_accuracy: 0.6434 - val_loss: 1.1751
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6345 - loss: 1.0468 - val_accuracy: 0.6546 - val_loss: 1.1510
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.6616 - loss: 0.9697 - val_accuracy: 0.6934 - val_loss: 1.0043
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6825 - loss: 0.9149 - val_accuracy: 0.7048 - val_loss: 1.0322
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6897 - loss: 0.8916 - val_accuracy: 0.7180 - val_loss: 0.9229
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7043 - loss: 0.8480 - val_accuracy: 0.7008 - val_loss: 0.9386
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 8ms/step - accuracy: 0.7126 - loss: 0.8210 - val_accuracy: 0.7184 - val_loss: 0.9051
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7234 - loss: 0.7966 - val_accuracy: 0.7110 - val_loss: 0.9234
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7247 - loss: 0.7858 - val_accuracy: 0.6978 - val_loss: 0.9289
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7228 - loss: 0.7845 - val_accuracy: 0.7200 - val_loss: 0.8486
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7389 - loss: 0.7503 - val_accuracy: 0.7552 - val_loss: 0.7911
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7444 - loss: 0.7399 - val_accuracy: 0.7364 - val_loss: 0.8214
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7487 - loss: 0.7163 - val_accuracy: 0.7402 - val_loss: 0.7999
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7518 - loss: 0.7114 - val_accuracy: 0.7534 - val_loss: 0.7547
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7580 - loss: 0.7002 - val_accuracy: 0.7518 - val_loss: 0.7744
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7557 - loss: 0.6932 - val_accuracy: 0.7474 - val_loss: 0.7664
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7592 - loss: 0.6844 - val_accuracy: 0.7254 - val_loss: 0.7965
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7567 - loss: 0.6917 - val_accuracy: 0.7638 - val_loss: 0.7454
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7696 - loss: 0.6685 - val_accuracy: 0.7372 - val_loss: 0.7930
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7703 - loss: 0.6600 - val_accuracy: 0.7390 - val_loss: 0.7770
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7673 - loss: 0.6645 - val_accuracy: 0.7590 - val_loss: 0.7368
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7647 - loss: 0.6716 - val_accuracy: 0.7620 - val_loss: 0.7242
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7690 - loss: 0.6609 - val_accuracy: 0.7610 - val_loss: 0.7308
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7745 - loss: 0.6380 - val_accuracy: 0.7474 - val_loss: 0.7672
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7797 - loss: 0.6291 - val_accuracy: 0.7600 - val_loss: 0.7399
Epoch 28/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7699 - loss: 0.6482 - val_accuracy: 0.7372 - val_loss: 0.7669
Epoch 29/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7792 - loss: 0.6261 - val_accuracy: 0.7330 - val_loss: 0.7876
Epoch 30/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7793 - loss: 0.6395 - val_accuracy: 0.7608 - val_loss: 0.7217
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - accuracy: 0.7658 - loss: 0.7322
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.83      0.76      0.79      1000
           1       0.93      0.83      0.88      1000
           2       0.78      0.56      0.65      1000
           3       0.60      0.62      0.61      1000
           4       0.69      0.79      0.73      1000
           5       0.71      0.68      0.69      1000
           6       0.64      0.92      0.76      1000
           7       0.87      0.74      0.80      1000
           8       0.77      0.92      0.84      1000
           9       0.90      0.79      0.84      1000

    accuracy                           0.76     10000
   macro avg       0.77      0.76      0.76     10000
weighted avg       0.77      0.76      0.76     10000

Accuracy Score: 0.7596
Root Mean Square Error: 1.9875110062588333
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 17s 15ms/step - accuracy: 0.3270 - loss: 1.8179 - val_accuracy: 0.5284 - val_loss: 1.4974
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5422 - loss: 1.2966 - val_accuracy: 0.6106 - val_loss: 1.3057
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5996 - loss: 1.1316 - val_accuracy: 0.6304 - val_loss: 1.2079
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6403 - loss: 1.0304 - val_accuracy: 0.6922 - val_loss: 1.0831
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.6636 - loss: 0.9606 - val_accuracy: 0.6594 - val_loss: 1.0580
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6830 - loss: 0.9016 - val_accuracy: 0.6916 - val_loss: 1.0090
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6942 - loss: 0.8743 - val_accuracy: 0.7208 - val_loss: 0.9301
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 11s 8ms/step - accuracy: 0.7059 - loss: 0.8349 - val_accuracy: 0.7188 - val_loss: 0.8893
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7146 - loss: 0.8142 - val_accuracy: 0.7294 - val_loss: 0.8707
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7250 - loss: 0.7892 - val_accuracy: 0.7430 - val_loss: 0.8200
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7268 - loss: 0.7790 - val_accuracy: 0.7172 - val_loss: 0.8401
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7339 - loss: 0.7594 - val_accuracy: 0.7124 - val_loss: 0.8621
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7416 - loss: 0.7418 - val_accuracy: 0.7498 - val_loss: 0.7859
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7437 - loss: 0.7360 - val_accuracy: 0.7332 - val_loss: 0.8279
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7488 - loss: 0.7135 - val_accuracy: 0.7564 - val_loss: 0.7776
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7581 - loss: 0.6923 - val_accuracy: 0.7498 - val_loss: 0.7587
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7586 - loss: 0.6888 - val_accuracy: 0.7668 - val_loss: 0.7592
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7610 - loss: 0.6835 - val_accuracy: 0.7616 - val_loss: 0.7581
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7678 - loss: 0.6665 - val_accuracy: 0.7404 - val_loss: 0.7765
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7651 - loss: 0.6658 - val_accuracy: 0.7240 - val_loss: 0.7936
Epoch 21/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7690 - loss: 0.6599 - val_accuracy: 0.7524 - val_loss: 0.7255
Epoch 22/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7666 - loss: 0.6530 - val_accuracy: 0.7076 - val_loss: 0.8376
Epoch 23/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7688 - loss: 0.6583 - val_accuracy: 0.7480 - val_loss: 0.7411
Epoch 24/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7791 - loss: 0.6264 - val_accuracy: 0.7592 - val_loss: 0.7257
Epoch 25/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7819 - loss: 0.6288 - val_accuracy: 0.7488 - val_loss: 0.7461
Epoch 26/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7771 - loss: 0.6299 - val_accuracy: 0.7540 - val_loss: 0.7398
Epoch 27/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7846 - loss: 0.6197 - val_accuracy: 0.7622 - val_loss: 0.7106
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.7633 - loss: 0.7172
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
Classification Report
              precision    recall  f1-score   support

           0       0.82      0.79      0.80      1000
           1       0.84      0.90      0.87      1000
           2       0.78      0.57      0.66      1000
           3       0.65      0.57      0.61      1000
           4       0.64      0.81      0.71      1000
           5       0.68      0.71      0.69      1000
           6       0.66      0.92      0.77      1000
           7       0.89      0.72      0.80      1000
           8       0.85      0.88      0.87      1000
           9       0.93      0.74      0.82      1000

    accuracy                           0.76     10000
   macro avg       0.77      0.76      0.76     10000
weighted avg       0.77      0.76      0.76     10000

Accuracy Score: 0.7613
Root Mean Square Error: 1.9528952864913163
/usr/local/lib/python3.11/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 18s 15ms/step - accuracy: 0.3028 - loss: 1.8737 - val_accuracy: 0.5142 - val_loss: 1.5717
Epoch 2/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5341 - loss: 1.3174 - val_accuracy: 0.5884 - val_loss: 1.3480
Epoch 3/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.5887 - loss: 1.1623 - val_accuracy: 0.6482 - val_loss: 1.1654
Epoch 4/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6281 - loss: 1.0606 - val_accuracy: 0.6596 - val_loss: 1.1163
Epoch 5/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6576 - loss: 0.9852 - val_accuracy: 0.6712 - val_loss: 1.0376
Epoch 6/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6744 - loss: 0.9352 - val_accuracy: 0.6568 - val_loss: 1.0513
Epoch 7/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.6851 - loss: 0.9023 - val_accuracy: 0.7182 - val_loss: 0.9038
Epoch 8/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.6969 - loss: 0.8719 - val_accuracy: 0.6928 - val_loss: 0.9213
Epoch 9/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7036 - loss: 0.8447 - val_accuracy: 0.7210 - val_loss: 0.8893
Epoch 10/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 6s 7ms/step - accuracy: 0.7132 - loss: 0.8057 - val_accuracy: 0.7326 - val_loss: 0.8508
Epoch 11/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7223 - loss: 0.7899 - val_accuracy: 0.7344 - val_loss: 0.8349
Epoch 12/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7297 - loss: 0.7753 - val_accuracy: 0.7074 - val_loss: 0.8774
Epoch 13/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7345 - loss: 0.7617 - val_accuracy: 0.7410 - val_loss: 0.8119
Epoch 14/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7409 - loss: 0.7407 - val_accuracy: 0.7570 - val_loss: 0.7663
Epoch 15/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7478 - loss: 0.7206 - val_accuracy: 0.7380 - val_loss: 0.8277
Epoch 16/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7446 - loss: 0.7235 - val_accuracy: 0.7474 - val_loss: 0.7781
Epoch 17/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7554 - loss: 0.7070 - val_accuracy: 0.7550 - val_loss: 0.7683
Epoch 18/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - accuracy: 0.7559 - loss: 0.6976 - val_accuracy: 0.7576 - val_loss: 0.7593
Epoch 19/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 10s 7ms/step - accuracy: 0.7568 - loss: 0.6935 - val_accuracy: 0.7264 - val_loss: 0.7968
Epoch 20/200
704/704 ━━━━━━━━━━━━━━━━━━━━ 5s 7ms/step - accuracy: 0.7607 - loss: 0.6802 - val_accuracy: 0.7316 - val_loss: 0.8204
Epoch 21/200
478/704 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.7626 - loss: 0.6745
In [25]:
patience_data_6_df = pd.DataFrame(patience_data_6)
patience_data_6_df
Out[25]:
model accuracy val_accuracy test_accuracy loss val_loss test_loss time
0 DNN 0.760 0.719 0.719 0.686 0.815 0.836 100.986
1 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.790 0.759 0.759 0.596 0.703 0.717 174.421
2 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.780 0.761 0.760 0.636 0.726 0.747 157.604
3 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.792 0.769 0.762 0.591 0.688 0.712 205.932
4 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.770 0.759 0.757 0.662 0.753 0.767 118.747
5 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.776 0.762 0.756 0.638 0.715 0.728 149.547
6 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.771 0.769 0.767 0.651 0.703 0.716 128.475
7 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.776 0.760 0.752 0.652 0.722 0.732 127.677
8 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.766 0.778 0.766 0.665 0.722 0.744 117.796
9 CNN_DO_MP_DO_X3_64_128_256-Pat-6 0.780 0.760 0.758 0.626 0.730 0.752 163.596
In [ ]:
patience_data_8_df = pd.DataFrame(patience_data_8)
patience_data_8_df
In [ ]:
patience_data_10_df = pd.DataFrame(patience_data_10)
patience_data_10_df
In [ ]:
#patience_data_6_df.drop('model', axis=1, inplace=True)
patience_data_8_df.drop('model', axis=1, inplace=True)
# patience_data_10_df.drop('model', axis=1, inplace=True)
In [27]:
patience_data_6_df.astype('float').describe()
Out[27]:
accuracy val_accuracy test_accuracy loss val_loss test_loss time
count 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000 10.000000
mean 0.776100 0.759600 0.755600 0.640300 0.727700 0.745100 144.478100
std 0.010005 0.015536 0.013624 0.029893 0.035421 0.036501 31.694022
min 0.760000 0.719000 0.719000 0.591000 0.688000 0.712000 100.986000
25% 0.770250 0.759250 0.756250 0.628500 0.706000 0.719750 120.979500
50% 0.776000 0.760500 0.758500 0.644500 0.722000 0.738000 139.011000
75% 0.780000 0.767250 0.761500 0.659500 0.729000 0.750750 162.098000
max 0.792000 0.778000 0.767000 0.686000 0.815000 0.836000 205.932000
In [ ]:
patience_data_8_df.astype('float').describe()
In [ ]:
patience_data_10_df.drop('model', axis=1, inplace=True)
patience_data_10_df.astype('float').describe()
In [ ]: